(p8) br.cond.spnt page_fault
#ifdef XEN
;;
+#ifdef CONFIG_VIRTUAL_FRAME_TABLE
+ // Test for the address of virtual frame_table
+ shr r22=r16,56;;
+ cmp.eq p8,p0=((VIRT_FRAME_TABLE_ADDR>>56)&0xff)-0x100,r22
+(p8) br.cond.sptk frametable_miss ;;
+#endif
// Test for Xen address, if not handle via page_fault
// note that 0xf000 (cached) and 0xe800 (uncached) addresses
// should be OK.
mov pr=r31,-1
rfi
END(alt_dtlb_miss)
+#ifdef CONFIG_VIRTUAL_FRAME_TABLE
+GLOBAL_ENTRY(frametable_miss)
+ rsm psr.dt // switch to using physical data addressing
+ movl r24=(frametable_pg_dir-PAGE_OFFSET) // r24=__pa(frametable_pg_dir)
+ ;;
+ srlz.d
+ extr.u r17=r16,PGDIR_SHIFT,(PAGE_SHIFT-3)
+ ;;
+ shladd r24=r17,3,r24 // r24=&pgd[pgd_offset(addr)]
+ ;;
+ ld8 r24=[r24] // r24=pgd[pgd_offset(addr)]
+ extr.u r18=r16,PMD_SHIFT,(PAGE_SHIFT-3) // r18=pmd_offset
+ ;;
+ cmp.eq p6,p7=0,r24 // pgd present?
+ shladd r24=r18,3,r24 // r24=&pmd[pmd_offset(addr)]
+ ;;
+(p7) ld8 r24=[r24] // r24=pmd[pmd_offset(addr)]
+ extr.u r19=r16,PAGE_SHIFT,(PAGE_SHIFT-3)// r19=pte_offset
+(p6) br.spnt.few frametable_fault
+ ;;
+ cmp.eq p6,p7=0,r24 // pmd present?
+ shladd r24=r19,3,r24 // r24=&pte[pte_offset(addr)]
+ ;;
+(p7) ld8 r24=[r24] // r24=pte[pte_offset(addr)]
+ mov r25=0x700|(_PAGE_SIZE_16K<<2) // key=7
+(p6) br.spnt.few frametable_fault
+ ;;
+ mov cr.itir=r25
+ ssm psr.dt // switch to using virtual data addressing
+ tbit.z p6,p7=r24,_PAGE_P_BIT // pte present?
+ ;;
+(p7) itc.d r24 // install updated PTE
+(p6) br.spnt.few frametable_fault // page present bit cleared?
+ ;;
+ mov pr=r31,-1 // restore predicate registers
+ rfi
+END(frametable_miss)
+ENTRY(frametable_fault)
+ ssm psr.dt // switch to using virtual data addressing
+ mov r18=cr.iip
+ movl r19=ia64_frametable_probe
+ ;;
+ cmp.eq p6,p7=r18,r19 // is faulting addrress ia64_frametable_probe?
+ mov r8=0 // assumes that 'probe.r' uses r8
+ dep r21=-1,r21,IA64_PSR_RI_BIT+1,1 // return to next instrucition in bundle 2
+ ;;
+(p6) mov cr.ipsr=r21
+ mov r19=4 // FAULT(4)
+(p7) br.spnt.few dispatch_to_fault_handler
+ ;;
+ mov pr=r31,-1
+ rfi
+END(frametable_fault)
+GLOBAL_ENTRY(ia64_frametable_probe)
+ probe.r r8=r32,0 // destination register must be r8
+ nop.f 0x0
+ br.ret.sptk.many b0 // this instruction must be in bundle 2
+END(ia64_frametable_probe)
+#endif /* CONFIG_VIRTUAL_FRAME_TABLE */
.org ia64_ivt+0x1400
/////////////////////////////////////////////////////////////////////////////////////////
#include <asm/pgtable.h>
#include <xen/mm.h>
-struct page_info *frame_table;
-unsigned long frame_table_size;
-unsigned long max_page;
+#ifdef CONFIG_VIRTUAL_FRAME_TABLE
+#include <linux/efi.h>
+#include <asm/pgalloc.h>
-struct page_info *mem_map;
-#define MAX_DMA_ADDRESS ~0UL // FIXME???
+extern pgd_t frametable_pg_dir[];
-#ifdef CONFIG_VIRTUAL_MEM_MAP
-static unsigned long num_dma_physpages;
+#define frametable_pgd_offset(addr) \
+ (frametable_pg_dir + (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)))
+
+static unsigned long table_size;
+static int opt_contig_mem = 0;
+boolean_param("contig_mem", opt_contig_mem);
+#else
+#define opt_contig_mem 1
#endif
+struct page_info *frame_table;
+unsigned long max_page;
+
/*
* Set up the page tables.
*/
unsigned long *mpt_table;
-unsigned long mpt_table_size;
void
paging_init (void)
{
unsigned int mpt_order;
+ unsigned long mpt_table_size;
unsigned long i;
+ if (!opt_contig_mem) {
+ /* mpt_table is already allocated at this point. */
+ return;
+ }
+
/* Create machine to physical mapping table
* NOTE: similar to frame table, later we may need virtually
* mapped mpt table if large hole exists. Also MAX_ORDER needs
}
}
+#ifdef CONFIG_VIRTUAL_FRAME_TABLE
+
+static inline void *
+alloc_dir_page(void)
+{
+ unsigned long mfn = alloc_boot_pages(1, 1);
+ unsigned long dir;
+ if (!mfn)
+ panic("Not enough memory for virtual frame table!\n");
+ ++table_size;
+ dir = mfn << PAGE_SHIFT;
+ memset(__va(dir), 0, PAGE_SIZE);
+ return (void *)dir;
+}
+
+static inline unsigned long
+alloc_table_page(unsigned long fill)
+{
+ unsigned long mfn = alloc_boot_pages(1, 1);
+ unsigned long *table;
+ unsigned long i;
+ if (!mfn)
+ panic("Not enough memory for virtual frame table!\n");
+ ++table_size;
+ table = (unsigned long *)__va((mfn << PAGE_SHIFT));
+ for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++)
+ table[i] = fill;
+ return mfn;
+}
+
+int
+create_frametable_page_table (u64 start, u64 end, void *arg)
+{
+ unsigned long address, start_page, end_page;
+ struct page_info *map_start, *map_end;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ map_start = frame_table + (__pa(start) >> PAGE_SHIFT);
+ map_end = frame_table + (__pa(end) >> PAGE_SHIFT);
+
+ start_page = (unsigned long) map_start & PAGE_MASK;
+ end_page = PAGE_ALIGN((unsigned long) map_end);
+
+ for (address = start_page; address < end_page; address += PAGE_SIZE) {
+ pgd = frametable_pgd_offset(address);
+ if (pgd_none(*pgd))
+ pgd_populate(NULL, pgd, alloc_dir_page());
+ pud = pud_offset(pgd, address);
+
+ if (pud_none(*pud))
+ pud_populate(NULL, pud, alloc_dir_page());
+ pmd = pmd_offset(pud, address);
+
+ if (pmd_none(*pmd))
+ pmd_populate_kernel(NULL, pmd, alloc_dir_page());
+ pte = pte_offset_kernel(pmd, address);
+
+ if (pte_none(*pte))
+ set_pte(pte, pfn_pte(alloc_table_page(0), PAGE_KERNEL));
+ }
+ return 0;
+}
+
+int
+create_mpttable_page_table (u64 start, u64 end, void *arg)
+{
+ unsigned long address, start_page, end_page;
+ unsigned long *map_start, *map_end;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ map_start = mpt_table + (__pa(start) >> PAGE_SHIFT);
+ map_end = mpt_table + (__pa(end) >> PAGE_SHIFT);
+
+ start_page = (unsigned long) map_start & PAGE_MASK;
+ end_page = PAGE_ALIGN((unsigned long) map_end);
+
+ for (address = start_page; address < end_page; address += PAGE_SIZE) {
+ pgd = frametable_pgd_offset(address);
+ if (pgd_none(*pgd))
+ pgd_populate(NULL, pgd, alloc_dir_page());
+ pud = pud_offset(pgd, address);
+
+ if (pud_none(*pud))
+ pud_populate(NULL, pud, alloc_dir_page());
+ pmd = pmd_offset(pud, address);
+
+ if (pmd_none(*pmd))
+ pmd_populate_kernel(NULL, pmd, alloc_dir_page());
+ pte = pte_offset_kernel(pmd, address);
+
+ if (pte_none(*pte))
+ set_pte(pte, pfn_pte(alloc_table_page(INVALID_M2P_ENTRY), PAGE_KERNEL));
+ }
+ return 0;
+}
+
+void init_virtual_frametable(void)
+{
+ /* Allocate virtual frame_table */
+ frame_table = (struct page_info *) VIRT_FRAME_TABLE_ADDR;
+ table_size = 0;
+ efi_memmap_walk(create_frametable_page_table, NULL);
+
+ printk("size of virtual frame_table: %lukB\n",
+ ((table_size << PAGE_SHIFT) >> 10));
+
+ /* Allocate virtual mpt_table */
+ table_size = 0;
+ mpt_table = (unsigned long *)VIRT_FRAME_TABLE_END - max_page;
+ efi_memmap_walk(create_mpttable_page_table, NULL);
+
+ printk("virtual machine to physical table: %p size: %lukB\n"
+ "max_page: 0x%lx\n",
+ mpt_table, ((table_size << PAGE_SHIFT) >> 10), max_page);
+}
+
+int
+ia64_mfn_valid (unsigned long pfn)
+{
+ extern long ia64_frametable_probe(unsigned long);
+ struct page_info *pg;
+ int valid;
+
+ if (opt_contig_mem)
+ return 1;
+ pg = mfn_to_page(pfn);
+ valid = ia64_frametable_probe((unsigned long)pg);
+ /* more check the whole struct of page_info */
+ if (valid)
+ valid = ia64_frametable_probe((unsigned long)(pg+1)-1);
+ return valid;
+}
+
+EXPORT_SYMBOL(ia64_mfn_valid);
+
+#endif /* CONFIG_VIRTUAL_FRAME_TABLE */
+
/* FIXME: postpone support to machines with big holes between physical memorys.
* Current hack allows only efi memdesc upto 4G place. (See efi.c)
*/
-#ifndef CONFIG_VIRTUAL_MEM_MAP
#define FT_ALIGN_SIZE (16UL << 20)
void __init init_frametable(void)
{
unsigned long pfn;
+ unsigned long frame_table_size;
+
+#ifdef CONFIG_VIRTUAL_FRAME_TABLE
+ if (!opt_contig_mem) {
+ init_virtual_frametable();
+ return;
+ }
+#endif
+
frame_table_size = max_page * sizeof(struct page_info);
frame_table_size = (frame_table_size + PAGE_SIZE - 1) & PAGE_MASK;
printk("size of frame_table: %lukB\n",
frame_table_size >> 10);
}
-#endif